Using real data. For more infromation on how these data were obtain go to: http://panthera:8888/notebooks/external_plugins/paper3code/%5BD.E%5DCell%20Extractor.ipynb

cd ../JSDM4POD/
/app/external_plugins/JSDM4POD
The following libraries are needed for fitting the model
import pandas as pd
import geopandas as gpd
import numpy as np
import scipy as sp
import patsy
import pystan
import pickle
import networkx as nt
import logging
import os
import sys, getopt
/opt/conda/envs/biospytial3/lib/python3.8/site-packages/geopandas/_compat.py:106: UserWarning: The Shapely GEOS version (3.8.0-CAPI-1.13.1 ) is incompatible with the GEOS version PyGEOS was compiled with (3.9.1-CAPI-1.14.2). Conversions between both will be slow. warnings.warn(
These libraries are used for visualising the posterior sample
import geoviews as gv
import geoviews.feature as gf
from cartopy import crs
import matplotlib.pyplot as plt
%matplotlib inline
plt.style.use('ggplot')
gv.extension('bokeh')
#,'matplotlib')
plt.rcParams['figure.figsize'] = [10, 10]
PATH = "/data/output/posterior_samps/"
PATH_BASE = ''
#PATH_HOME = PATH_BASE + 'paper3code/'
PATH_HOME = PATH_BASE + ''
sys.path.append(PATH_BASE)
import models
logger = logging.getLogger('Multispecies_car-cs2')
logger.setLevel(logging.DEBUG)
import case_study2 as cs
input_data_path = 'data/case-study2/casestudy2_grid128_fullstack_env_anth_extended_selowres.gpkg'
input_lattice_path = 'data/case-study2/lattice128.pkl-p2'
duple_df_mat = cs.prepareInputData(INPUT_DATA_FILE=input_data_path,INPUT_LATTICE_DATA=input_lattice_path)
INFO:Multispecies_car-cs2:Preparing input data ERROR:fiona._env:PROJ: proj_create_from_name: /opt/conda/envs/biospytial3/share/proj/proj.db lacks DATABASE.LAYOUT.VERSION.MAJOR / DATABASE.LAYOUT.VERSION.MINOR metadata. It comes from another PROJ installation. INFO:fiona.ogrext:Failed to auto identify EPSG: 7
data, M = duple_df_mat[1]
## TEST: REMOVE MISSING DATA (2.0)
##data_d.Y.replace(2.0,0.0,inplace=True)
The following line gives the structure of the dataframe.
data.head(3)
| index | Y | taxon | Elevation_m | MeanTemp_m | Precipitation_m | SolarRadiation_m | Longitude | Latitude | Population_m | distance_to_road | cellid_ecor | wwf_mhtnam | wwf_mhtnum | cellid_ty | nom_mun | tipolgia14 | mode | oldY | geometry | ||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| id | level | ||||||||||||||||||||
| 1021 | 1 | 2 | 0.0 | Disocactus | -254.029167 | 16.595833 | 21.985043 | 18075.597222 | -117.007812 | 32.125 | 65.587155 | 889.976144 | 1021 | Mediterranean Forests, Woodlands and Scrub | 12 | 1021 | Playas de Rosarito | Metropolitano | 1.0 | 0.0 | POLYGON ((-117.14062 32.00000, -116.87500 32.0... |
| 1022 | 1 | 3 | 0.0 | Disocactus | 160.266667 | 17.280824 | 24.581583 | 18154.211601 | -117.007812 | 32.375 | 1029.079257 | 3515.856005 | 1022 | Mediterranean Forests, Woodlands and Scrub | 12 | 1022 | Playas de Rosarito | Metropolitano | 1.0 | 0.0 | POLYGON ((-117.14062 32.25000, -116.87500 32.2... |
| 1023 | 1 | 4 | 0.0 | Disocactus | 132.900000 | 17.855101 | 24.104762 | 18043.944689 | -117.007812 | 32.625 | 4284.286623 | 2836.496644 | 1023 | Mediterranean Forests, Woodlands and Scrub | 12 | 1023 | Tijuana | Metropolitano | NaN | 0.0 | POLYGON ((-117.14062 32.50000, -116.87500 32.5... |
Here we show the spatial observations of Disocactus (id = 1) on the level = 1
data.xs(1,level=1).plot(edgecolor='black',column='Y')
<AxesSubplot:>
plt.imshow(M.todense()[:100,:100])
<matplotlib.image.AxesImage at 0x7ff67ccf3850>
Define the formulas for the ecological process ($P$) and the sampling effort ($S$). In this case the ecological process would be defined as:
$$ logit(P_y) = Elevation + Precipitation + MeanTemperature + G$$$$ logit(S) = Ecoregions + Typology + G$$Note:
Elevation, Precipitation and Mean Temperature will be standardized (i.e. $\frac{X}{\sigma²} - \mu_X$ X $\in$ {Elevation, Precipitation, Mean Temperature})
wwf_mhtnam is the name of the column corresponding to the Terrestrial Ecoregions, tipolgia14 is the name of the column corresponding to the settlement typology.
eco_formula = '~ standardize(Elevation_m) + standardize(Precipitation_m) +standardize(MeanTemp_m) -1 '
sample_formula = '~ C(wwf_mhtnam) + C(tipolgia14)'
data_multilevel_CAR = cs.compileDataDict(data,M,eco_formula = eco_formula, sample_formula = sample_formula)
INFO:Multispecies_car-cs2:Compiling Data input for the STAN model INFO:Multispecies_car-cs2:Starting model for TerrEcoregions and Typology ...
logger.info("Compiling source code of the multispecies model")
multispecies_model = models.multispeciesCARModel_stationaryBernoulliMissingData()
multispecies_model = pystan.StanModel(model_code=multispecies_model)
INFO:Multispecies_car-cs2:Compiling source code of the multispecies model INFO:pystan:COMPILING THE C++ CODE FOR MODEL anon_model_ed725f80ae68adc72b7571260fd40e9b NOW.
We will sample 1000 iterations using four chains (no thinning) using Hamiltonian Monte Carlo.
## Load model and compile
NITERS = 1000
NCHAINS = 4
THINNING = 1
SEED = 12345
logger.info("Commencing posterior sampling through HMCM")
%time fit3 = multispecies_model.sampling(data=data_multilevel_CAR,
iter=int(NITERS),
chains=int(NCHAINS),
thin=int(THINNING),
control={'adapt_delta':0.81},
seed=int(SEED)) #,'max_treedepth':15})
Calculate the summary statistics of the fixed effects (parameters beta), the mixed component between the ecological process $P_i$ and the sampling effort $S$, the spatial autocorrelation (alpha_car) and the overall variance (tau).
print(fit3.stansummary(pars='beta'))
print(fit3.stansummary(pars='alpha'))
print(fit3.stansummary(pars='alpha_car'))
print(fit3.stansummary(pars='tau'))
Inference for Stan model: anon_model_ed725f80ae68adc72b7571260fd40e9b.
4 chains, each with iter=1000; warmup=500; thin=1;
post-warmup draws per chain=500, total post-warmup draws=2000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
beta[1,1] 1.32 0.02 0.56 0.25 0.94 1.29 1.69 2.45 888 1.01
beta[2,1] 3.16 0.04 0.37 2.45 2.91 3.14 3.41 3.92 86 1.07
beta[3,1] 4.13 0.05 0.43 3.29 3.81 4.13 4.43 4.96 74 1.08
beta[4,1] 3.79 0.07 0.53 2.74 3.43 3.78 4.15 4.82 59 1.11
beta[5,1] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[1,2] 1.55 0.02 0.18 1.19 1.44 1.56 1.67 1.89 52 1.11
beta[2,2] -1.5 0.02 0.27 -2.05 -1.68 -1.5 -1.32 -0.98 265 1.02
beta[3,2] -2.35 0.04 0.41 -3.19 -2.61 -2.34 -2.07 -1.58 100 1.05
beta[4,2] -2.59 0.07 0.52 -3.6 -2.95 -2.59 -2.23 -1.6 61 1.08
beta[5,2] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[1,3] 0.16 0.02 0.55 -0.91 -0.2 0.15 0.52 1.29 1083 1.0
beta[2,3] 1.72 0.02 0.36 1.04 1.47 1.71 1.96 2.45 280 1.02
beta[3,3] 2.17 0.02 0.41 1.39 1.89 2.17 2.46 3.01 674 1.02
beta[4,3] 2.36 0.04 0.53 1.33 2.01 2.37 2.72 3.39 215 1.03
beta[5,3] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[1,4] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,4] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,4] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,4] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,4] 8.86 0.08 0.96 6.77 8.3 8.92 9.53 10.62 136 1.04
beta[1,5] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,5] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,5] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,5] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,5] 0.34 0.04 0.71 -1.05 -0.12 0.31 0.8 1.85 400 1.01
beta[1,6] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,6] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,6] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,6] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,6] 0.27 0.03 1.45 -2.61 -0.69 0.25 1.21 3.22 1767 1.0
beta[1,7] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,7] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,7] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,7] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,7] 1.46 0.04 0.75 0.06 0.95 1.42 1.94 3.06 451 1.0
beta[1,8] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,8] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,8] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,8] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,8] 1.77 0.04 0.99 -0.1 1.08 1.76 2.41 3.87 675 1.0
beta[1,9] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,9] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,9] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,9] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,9] 0.4 0.05 1.99 -4.23 -0.68 0.49 1.75 4.02 1444 1.0
beta[1,10] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,10] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,10] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,10] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,10] -0.16 0.04 0.71 -1.53 -0.63 -0.19 0.29 1.33 370 1.01
beta[1,11] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,11] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,11] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,11] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,11] 0.83 0.03 0.71 -0.49 0.36 0.79 1.29 2.35 415 1.01
beta[1,12] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,12] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,12] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,12] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,12] 1.48 0.05 1.16 -0.67 0.7 1.42 2.27 3.87 590 1.0
beta[1,13] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,13] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,13] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,13] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,13] -0.09 0.04 0.73 -1.48 -0.58 -0.1 0.38 1.44 368 1.01
beta[1,14] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,14] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,14] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,14] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,14] 0.57 0.02 0.68 -0.71 0.07 0.56 1.02 1.95 920 1.01
beta[1,15] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,15] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,15] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,15] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,15] 0.81 0.02 0.69 -0.49 0.34 0.8 1.28 2.2 916 1.01
beta[1,16] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,16] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,16] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,16] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,16] 0.4 0.03 0.67 -0.88 -0.06 0.39 0.86 1.79 590 1.01
beta[1,17] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,17] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,17] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,17] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,17] 0.59 0.03 0.68 -0.68 0.12 0.59 1.04 1.98 516 1.01
beta[1,18] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[2,18] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[3,18] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[4,18] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
beta[5,18] 1.07 0.02 0.69 -0.2 0.58 1.05 1.53 2.46 855 1.01
Samples were drawn using NUTS at Thu Jul 15 00:24:25 2021.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
Inference for Stan model: anon_model_ed725f80ae68adc72b7571260fd40e9b.
4 chains, each with iter=1000; warmup=500; thin=1;
post-warmup draws per chain=500, total post-warmup draws=2000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
alpha[1,1] 0.77 8.4e-3 0.04 0.7 0.74 0.77 0.8 0.86 26 1.23
alpha[2,1] 0.53 6.6e-3 0.03 0.48 0.5 0.52 0.54 0.6 20 1.34
alpha[3,1] 0.76 8.0e-3 0.04 0.68 0.73 0.76 0.79 0.85 30 1.18
alpha[4,1] 0.83 5.8e-3 0.04 0.75 0.8 0.83 0.86 0.91 50 1.1
alpha[5,1] 0.0 nan 0.0 0.0 0.0 0.0 0.0 0.0 nan nan
alpha[1,2] 0.23 8.4e-3 0.04 0.14 0.2 0.23 0.26 0.3 26 1.23
alpha[2,2] 0.47 6.6e-3 0.03 0.4 0.46 0.48 0.5 0.52 20 1.34
alpha[3,2] 0.24 8.0e-3 0.04 0.15 0.21 0.24 0.27 0.32 30 1.18
alpha[4,2] 0.17 5.8e-3 0.04 0.09 0.14 0.17 0.2 0.25 50 1.1
alpha[5,2] 1.0 nan 0.0 1.0 1.0 1.0 1.0 1.0 nan nan
Samples were drawn using NUTS at Thu Jul 15 00:24:25 2021.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
Inference for Stan model: anon_model_ed725f80ae68adc72b7571260fd40e9b.
4 chains, each with iter=1000; warmup=500; thin=1;
post-warmup draws per chain=500, total post-warmup draws=2000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
alpha_car 1.0 9.4e-8 1.3e-6 1.0 1.0 1.0 1.0 1.0 190 1.02
Samples were drawn using NUTS at Thu Jul 15 00:24:25 2021.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
Inference for Stan model: anon_model_ed725f80ae68adc72b7571260fd40e9b.
4 chains, each with iter=1000; warmup=500; thin=1;
post-warmup draws per chain=500, total post-warmup draws=2000.
mean se_mean sd 2.5% 25% 50% 75% 97.5% n_eff Rhat
tau 3.75 0.22 0.88 2.42 3.04 3.69 4.26 5.74 16 1.19
Samples were drawn using NUTS at Thu Jul 15 00:24:25 2021.
For each parameter, n_eff is a crude measure of effective sample size,
and Rhat is the potential scale reduction factor on split chains (at
convergence, Rhat=1).
data_dd= data.drop(['Latitude', 'Longitude'],axis=1)
#Extract P and S
results = fit3.extract(pars=['P','S','G','Q'])
#results = fit_continue.extract(pars=['P','S','G','Q'])
import scipy.special as sp
from holoviews import opts
P = results['P']
S = results['S']
G = results['G']
Q = results['Q']
P = pd.DataFrame(P.T)
Q = pd.DataFrame(Q.T)
G = pd.DataFrame(G.T)
S = pd.DataFrame(S.T)
quant = 0.5
#P_median = sp.expit(P.quantile(q=quant,axis=1))
#Q_median = sp.expit(Q.quantile(q=quant,axis=1))
#G_median = G.quantile(q=quant,axis=1)
#S_median = sp.expit(S.quantile(q=quant,axis=1))
#P_median = P.mean(axis=1)
P_median = P.quantile(q=0.5,axis=1)
Q_median = Q.quantile(q=0.5,axis=1)
G_median = G.quantile(q=0.5,axis=1)
S_median = S.quantile(q=0.5,axis=1)
medians_levels = pd.concat([P_median,Q_median],axis=1)
medians_s = pd.concat([G_median,S_median],axis=1)
medians_levels.index = data_dd.index
medians_levels.columns = ['Pm','Qm']
medians_s.index = data.xs(1,level=1).index
medians_s.columns = ['Gm','Sm']
new_l = gpd.GeoDataFrame(pd.concat([medians_levels,data_dd],axis=1))
new_s = gpd.GeoDataFrame(pd.concat([medians_s,data_dd.xs(1,level=1)],axis=1))
#data = data_d
%time taxa_df = list(map(lambda i : data.xs(i,level=1)[data.Y.xs(i,level=1) > 0],range(1,6)))
%time taxa_results = list(map(lambda i : new_l.xs(i,level=1),range(1,6)))
CPU times: user 158 ms, sys: 3.72 ms, total: 162 ms Wall time: 162 ms CPU times: user 84.6 ms, sys: 0 ns, total: 84.6 ms Wall time: 84.5 ms
tnames = list(data_dd['taxon'].xs(15566))
## Mesophyl
i = 0
group_name = tnames[i]
label_p = "Ecological Suitability for "
label_q = "Mixed latent Q(x) for "
complement_df = taxa_df[4]
presences_df = taxa_df[i]
#pines_pt = gTDF[gTDF.Abies > 0]
#complement_pt = taxa_df[1]
#### Change accordingly
width = 600
height = 650
sample_pt = gv.Points((complement_df.Longitude,complement_df.Latitude),group=group_name,label='Sample').opts(
fill_color = 'red',
line_color = 'grey',
line_width = 0.2,
line_alpha = 0.9,
fill_alpha = 0.6,
size = 2,
)
presence_pt = gv.Points((presences_df.Longitude,presences_df.Latitude),group=group_name,label='Presence').opts(
marker = 'x',
color = 'black')
obs = (sample_pt * presence_pt)
cmap = plt.cm.viridis
#cmap = plt.cm.seismic
P = gv.Polygons(taxa_results[i],vdims=['Pm','Qm'],group=group_name,label="Ecological Suitability P(x)")
Q = gv.Polygons(taxa_results[i],vdims=['Qm','Pm'],group=group_name,label="Mixing effect Q(x)")
latent_pres_fig = ((P * obs).relabel(label_p) + (Q * obs).relabel(label_q))
#latent_pres_fig = ((P).relabel(lm) + (Q ).relabel(lm))
#clim = (-4,4)
#clim = (0,1)
latent_pres_fig.opts(
opts.Polygons(
line_width=0.001,
line_alpha = 0.2,
tools=['hover'],
width = width,
height = height,
colorbar = True,
#clim = clim,
colorbar_position = 'bottom',
alpha = 1.0,
cmap = cmap,),
#merge_tools=False
)
i = 1
group_name = tnames[i]
label_p = "Ecological Suitability for "
label_q = "Mixed latent Q(x) for "
complement_df = taxa_df[4]
presences_df = taxa_df[i]
#pines_pt = gTDF[gTDF.Abies > 0]
#complement_pt = taxa_df[1]
#### Change accordingly
width = 600
height = 650
sample_pt = gv.Points((complement_df.Longitude,complement_df.Latitude),group=group_name,label='Sample').opts(
fill_color = 'red',
line_color = 'grey',
line_width = 0.2,
line_alpha = 0.9,
fill_alpha = 0.6,
size = 2,
)
presence_pt = gv.Points((presences_df.Longitude,presences_df.Latitude),group=group_name,label='Presence').opts(
marker = 'x',
color = 'black')
obs = (sample_pt * presence_pt)
cmap = plt.cm.viridis
P = gv.Polygons(taxa_results[i],vdims=['Pm','Qm'],group=group_name,label="Ecological Suitability P(x)")
Q = gv.Polygons(taxa_results[i],vdims=['Qm','Pm'],group=group_name,label="Mixing effect Q(x)")
latent_pres_fig = ((P * obs).relabel(label_p) + (Q * obs).relabel(label_q))
#latent_pres_fig = ((P).relabel(lm) + (Q ).relabel(lm))
#clim = (-4,4)
latent_pres_fig.opts(
opts.Polygons(
line_width=0.001,
line_alpha = 0.2,
tools=['hover'],
width = width,
height = height,
colorbar = True,
#clim = clim,
colorbar_position = 'bottom',
alpha = 1.0,
cmap = cmap,),
#merge_tools=False
)
i = 2
group_name = tnames[i]
label_p = "Ecological Suitability for "
label_q = "Mixed latent Q(x) for "
complement_df = taxa_df[4]
presences_df = taxa_df[i]
#pines_pt = gTDF[gTDF.Abies > 0]
#complement_pt = taxa_df[1]
#### Change accordingly
width = 600
height = 650
sample_pt = gv.Points((complement_df.Longitude,complement_df.Latitude),group=group_name,label='Sample').opts(
fill_color = 'red',
line_color = 'grey',
line_width = 0.2,
line_alpha = 0.9,
fill_alpha = 0.6,
size = 2,
)
presence_pt = gv.Points((presences_df.Longitude,presences_df.Latitude),group=group_name,label='Presence').opts(
marker = 'x',
color = 'black')
obs = (sample_pt * presence_pt)
cmap = plt.cm.viridis
P = gv.Polygons(taxa_results[i],vdims=['Pm','Qm'],group=group_name,label="Ecological Suitability P(x)")
Q = gv.Polygons(taxa_results[i],vdims=['Qm','Pm'],group=group_name,label="Mixing effect Q(x)")
latent_pres_fig = ((P * obs).relabel(label_p) + (Q * obs).relabel(label_q))
#latent_pres_fig = ((P).relabel(lm) + (Q ).relabel(lm))
#clim = (-4,4)
latent_pres_fig.opts(
opts.Polygons(
line_width=0.001,
line_alpha = 0.2,
tools=['hover'],
width = width,
height = height,
colorbar = True,
#clim = clim,
colorbar_position = 'bottom',
alpha = 1.0,
cmap = cmap,),
#merge_tools=False
)
i = 3
group_name = tnames[i]
label_p = "Ecological Suitability for "
label_q = "Mixed latent Q(x) for "
complement_df = taxa_df[4]
presences_df = taxa_df[i]
#pines_pt = gTDF[gTDF.Abies > 0]
#complement_pt = taxa_df[1]
#### Change accordingly
width = 600
height = 650
sample_pt = gv.Points((complement_df.Longitude,complement_df.Latitude),group=group_name,label='Sample').opts(
fill_color = 'red',
line_color = 'grey',
line_width = 0.2,
line_alpha = 0.9,
fill_alpha = 0.6,
size = 2,
)
presence_pt = gv.Points((presences_df.Longitude,presences_df.Latitude),group=group_name,label='Presence').opts(
marker = 'x',
color = 'black')
obs = (sample_pt * presence_pt)
cmap = plt.cm.viridis
P = gv.Polygons(taxa_results[i],vdims=['Pm','Qm'],group=group_name,label="Ecological Suitability P(x)")
Q = gv.Polygons(taxa_results[i],vdims=['Qm','Pm'],group=group_name,label="Mixing effect Q(x)")
latent_pres_fig = ((P * obs).relabel(label_p) + (Q * obs).relabel(label_q))
#latent_pres_fig = ((P).relabel(lm) + (Q ).relabel(lm))
#clim = (-4,4)
latent_pres_fig.opts(
opts.Polygons(
line_width=0.001,
line_alpha = 0.2,
tools=['hover'],
width = width,
height = height,
colorbar = True,
#clim = clim,
colorbar_position = 'bottom',
alpha = 1.0,
cmap = cmap,),
#merge_tools=False
)
i = 4
group_name = tnames[i]
label_p = "Ecological Suitability for "
label_q = "Mixed latent Q(x) for "
complement_df = taxa_df[4]
presences_df = taxa_df[i]
#pines_pt = gTDF[gTDF.Abies > 0]
#complement_pt = taxa_df[1]
#### Change accordingly
width = 600
height = 650
sample_pt = gv.Points((complement_df.Longitude,complement_df.Latitude),group=group_name,label='Sample').opts(
fill_color = 'red',
line_color = 'grey',
line_width = 0.2,
line_alpha = 0.9,
fill_alpha = 0.6,
size = 2,
)
presence_pt = gv.Points((presences_df.Longitude,presences_df.Latitude),group=group_name,label='Presence').opts(
marker = 'x',
color = 'black')
obs = (sample_pt * presence_pt)
cmap = plt.cm.viridis
#cmap = plt.cm.RdBu
#cmap = plt.cm.Blues
P = gv.Polygons(taxa_results[i],vdims=['Pm','Qm'],group=group_name,label="Ecological Suitability P(x)")
Q = gv.Polygons(taxa_results[i],vdims=['Qm','Pm'],group=group_name,label="Mixing effect Q(x)")
latent_pres_fig = ((P * obs).relabel(label_p) + (Q * obs).relabel(label_q))
#latent_pres_fig = ((P).relabel(lm) + (Q ).relabel(lm))
#clim = (-4,4)
clim = (0,1)
latent_pres_fig.opts(
opts.Polygons(
line_width=0.001,
line_alpha = 0.2,
tools=['hover'],
width = width,
height = height,
colorbar = True,
#clim = clim,
colorbar_position = 'bottom',
alpha = 1.0,
cmap = cmap,),
#merge_tools=False
)
#logger.info("Sampling finished")
#logger.info("Initiating model and posterior sampling back-up through pickling method using protocol -1")
#_file = PATH + FILE_POST_SAMP
#with open(_file,"wb") as f:
# logger.info("Saving pickled file")
# pickle.dump({'model':multispecies_model, 'fit': fit3},f, protocol=-1)
# logger.info("End!, model and posterior sample have been returned by this main function")
# return(multispecies_model,fit3)
## Uncomment to read the saved STAN OBJECT
## Read Stan object from disk
#import pickle
# Change name accordingly
#_file = "/outputs/presence_only_models/multispecies_model.pkl"
#with open(_file, "rb") as f:
# data_list = pickle.load(f)
#fit3 = data_dict['fit']
# fit = data_list[1]
## Continue after burnin
# reuse tuned parameters
stepsize = fit3.get_stepsize()
# by default .get_inv_metric returns a list
inv_metric = fit3.get_inv_metric(as_dict=True)
init = fit3.get_last_position()
# increment seed by 1
#seed2 = seed + 1
control = {"stepsize" : stepsize,
"inv_metric" : inv_metric,
"adapt_engaged" : False
}
%time fit_continue = multispecies_model.sampling(data=data_multilevel_CAR,iter=100,chains=4,control=control,init=init)#seed=seed,) #,'max_treedepth':15})
import pickle
with open("/mnt/data1/outputs/from_hec/multispecies_m.pkl", "rb") as f:
data_dict = pickle.load(f)
# or with a list
# data_list = pickle.load(f)
fit = data_dict['fit']
# fit = data_list[1]
print(fit.stansummary(pars='beta'))
print(fit.stansummary(pars='alpha'))
print(fit.stansummary(pars='alpha_car'))
print(fit.stansummary(pars='tau'))